#!pip install piexif
!pip install keract
Collecting keract Using cached keract-4.5.0-py2.py3-none-any.whl (12 kB) Installing collected packages: keract Successfully installed keract-4.5.0
%load_ext autoreload
%autoreload
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.image as mpimg
from PIL import Image
import piexif
import os
import errno
import shutil
from tensorflow.keras.preprocessing.image import smart_resize
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from tensorflow.keras.layers import Dense, Flatten, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import Callback
import keract
from utils import join_path, rotate_image, plot_transforms, join_path_list
import matplotlib.pyplot as plt
plt.style.use('dark_background')
The autoreload extension is already loaded. To reload it, use: %reload_ext autoreload
#from google.colab import drive
#drive.mount('/content/drive')
def join_path(path_1='', path_2=''):
return os.path.join(path_1, path_2).replace('\\', '/')
def create_folder(path='', folder_name=''):
try:
os.mkdir(os.path.join(path, folder_name).replace('\\', '/'))
except OSError as e:
if e.errno==errno.EEXIST:
print('Dierctory not created, already exists')
else:
raise
project_path = r'C:\Users\jan\projects\tymbark_ase'
#project_path = r'\content\drive\My Drive\projects\tymbark_ase'
project_path = join_path(project_path)
raw_data = 'original_data'
raw_data_path = join_path(project_path, raw_data)
modeling_data_dir = 'modeling_data'
training_dir = 'train'
validation_dir = 'validation'
modeling_data_path = join_path(project_path, modeling_data_dir)
training_path = join_path(modeling_data_path, training_dir)
validation_path = join_path(modeling_data_path, validation_dir)
classes = os.listdir(raw_data_path)
try:
shutil.rmtree(modeling_data_path)
except FileNotFoundError:
pass
create_folder(project_path, 'modeling_data')
create_folder(modeling_data_path, training_dir)
create_folder(modeling_data_path, validation_dir)
for dataset in [training_path, validation_path]:
for label in classes:
create_folder(dataset, label)
def split(files_dir='', split_frac=0.8, seed=None):
examples = pd.Series(os.listdir(files_dir))
train = examples.sample(frac=split_frac, random_state=seed).to_list()
val = examples.loc[~examples.isin(train)].to_list()
return train, val
def join_path_list(path='', list_of_paths=[]):
return [join_path(path, photo) for photo in list_of_paths]
def copy_file_list(list_of_paths=[], dest_dir=''):
for file in list_of_paths: shutil.copy2(file, dest_dir)
def copy_split(classes_folders_path='', train_dir='',
val_dir='', split_frac=0.8, seed=None):
for label in classes:
class_data_path = join_path(classes_folders_path, label)
train_fnames, val_fnames = split(class_data_path, split_frac=split_frac, seed=seed)
train_fpaths = join_path_list(class_data_path, train_fnames)
val_fpaths = join_path_list(class_data_path, val_fnames)
copy_file_list(train_fpaths, join_path(train_dir, label))
copy_file_list(val_fpaths, join_path(val_dir, label))
copy_split(raw_data_path, training_path, validation_path, split_frac=0.7, seed=2)
def rename_files(classes_dir_path='', dataset_name=str):
for label in os.listdir(classes_dir_path):
class_dir = join_path(classes_dir_path, label)
class_fnames = os.listdir(class_dir)
for file_no, file_name in enumerate(class_fnames, 1):
os.rename(join_path(class_dir, file_name),
join_path(class_dir, dataset_name + '_' + label + '_' + str(file_no) + file_name[-4:]))
rename_files(training_path, 'train')
rename_files(validation_path, 'val')
for label in classes:
for dir_name, path in zip([training_dir, validation_dir],
[training_path, validation_path]):
image_names = pd.Series(os.listdir(join_path(path, label)))
no_of_images = image_names.shape[0]
ex_names = image_names.sample(n=3).to_list()
print(label, '|', dir_name, no_of_images, ex_names)
#os.listdir(join_path(path, lablel))
if label == classes[-1]: pass
else: print('\n')
cyt_mie | train 50 ['train_cyt_mie_4.jpg', 'train_cyt_mie_45.JPG', 'train_cyt_mie_2.jpg'] cyt_mie | validation 22 ['val_cyt_mie_12.JPG', 'val_cyt_mie_8.jpg', 'val_cyt_mie_6.jpg'] jab_arb | train 51 ['train_jab_arb_31.JPG', 'train_jab_arb_42.JPG', 'train_jab_arb_33.JPG'] jab_arb | validation 22 ['val_jab_arb_6.jpg', 'val_jab_arb_11.JPG', 'val_jab_arb_4.jpg'] jab_brz | train 50 ['train_jab_brz_22.jpg', 'train_jab_brz_6.jpg', 'train_jab_brz_16.jpg'] jab_brz | validation 22 ['val_jab_brz_5.jpg', 'val_jab_brz_20.JPG', 'val_jab_brz_17.JPG'] jab_mie | train 50 ['train_jab_mie_34.JPG', 'train_jab_mie_14.jpg', 'train_jab_mie_44.JPG'] jab_mie | validation 22 ['val_jab_mie_1.jpg', 'val_jab_mie_8.jpg', 'val_jab_mie_7.jpg'] jab_wis | train 50 ['train_jab_wis_28.JPG', 'train_jab_wis_12.jpg', 'train_jab_wis_31.JPG'] jab_wis | validation 22 ['val_jab_wis_16.JPG', 'val_jab_wis_1.jpg', 'val_jab_wis_2.jpg'] man_mie | train 50 ['train_man_mie_33.JPG', 'train_man_mie_37.JPG', 'train_man_mie_4.jpg'] man_mie | validation 22 ['val_man_mie_6.jpg', 'val_man_mie_13.JPG', 'val_man_mie_15.JPG'] pom_brz | train 50 ['train_pom_brz_47.JPG', 'train_pom_brz_28.JPG', 'train_pom_brz_22.jpg'] pom_brz | validation 22 ['val_pom_brz_3.jpg', 'val_pom_brz_16.JPG', 'val_pom_brz_7.jpg']
def chceck_exif(image):
try:
orientation = piexif.load(image.info['exif'])['0th'][piexif.ImageIFD.Orientation]
except KeyError:
orientation = 'no data'
finally:
return orientation
def count_shapes(labels_dirs_path=''):
columns = ['width', 'height', 'channels', 'format', 'orientation']
shapes_df = pd.DataFrame(np.zeros(len(columns)), index=columns).T
labels = os.listdir(labels_dirs_path)
for label in labels:
label_path = join_path(labels_dirs_path, label)
fnames = os.listdir(label_path)
fpaths = join_path_list(label_path, fnames)
for fpath in fpaths:
image = Image.open(fpath)
shape = image.size
mode = image.mode
file_format = image.format
orientation = chceck_exif(image)
image_info = list(shape)
image_info = image_info + [mode, file_format, orientation]
iamge_info = pd.DataFrame(image_info, index=columns).T
shapes_df = pd.concat([shapes_df, iamge_info])
shapes_df.reset_index(inplace=True, drop=True)
shapes_df = shapes_df.iloc[1:, :]
shapes_df['count'] = 1
shapes_df = shapes_df.groupby(columns).sum().reset_index()
return shapes_df
print('Traing images info:')
display(count_shapes(training_path))
print('Validation images info:')
display(count_shapes(validation_path))
Traing images info:
| width | height | channels | format | orientation | count | |
|---|---|---|---|---|---|---|
| 0 | 3264 | 2448 | RGB | JPEG | 6 | 24 |
| 1 | 3840 | 2160 | RGB | JPEG | 6 | 173 |
| 2 | 4608 | 3456 | RGB | JPEG | 6 | 154 |
Validation images info:
| width | height | channels | format | orientation | count | |
|---|---|---|---|---|---|---|
| 0 | 3264 | 2448 | RGB | JPEG | 6 | 3 |
| 1 | 3840 | 2160 | RGB | JPEG | 6 | 97 |
| 2 | 4608 | 3456 | RGB | JPEG | 6 | 54 |
def rotate_image(image_path, rotations=3):
im = mpimg.imread(image_path)
rgb = []
for channel in range(im.shape[2]):
im_channel = im[:, :, channel]
im_channel = np.rot90(im_channel, rotations)
rgb.append(im_channel)
rotated_im = np.dstack((rgb[0], rgb[1], rgb[2]))
return rotated_im
def plot_example_images(rows=3, samples=4, classes_dirs_path='', labels=[],
figsize=[14, 8], rotations=3):
fig, axs = plt.subplots(nrows=rows, ncols=samples, figsize=figsize)
row = 0
for label in labels:
label_path = join_path(training_path, label)
images_paths = pd.Series(join_path_list(
label_path,
os.listdir(label_path))).sample(samples).to_list()
for image_index, image, in enumerate(images_paths):
im = mpimg.imread(image)
if pd.Series([rotations]).isin([1, 2, 3]).values[0]:
im = rotate_image(image, rotations=rotations)
axs[row][image_index].imshow(im)
axs[row][image_index].axis('off')
row += 1
plt.show()
paths = plot_example_images(rows=len(classes), samples=4, classes_dirs_path=training_path,
labels=classes, figsize=[14, 14], rotations=0)
print('Rotated images')
paths = plot_example_images(rows=len(classes), samples=4, classes_dirs_path=training_path,
labels=classes, figsize=[10, 20], rotations=3)
Rotated images
target_shape = [256, 256]
def resize_images(classes_dirs_path='', shape=target_shape):
for label in os.listdir(classes_dirs_path):
class_dir_path = join_path(classes_dirs_path, label)
images_paths = join_path_list(class_dir_path, os.listdir(class_dir_path))
for image in images_paths:
Image.open(image).resize(shape).save(image)
resize_images(training_path)
resize_images(validation_path)
def rotate_and_save(classes_dirs_path='', rotations=3):
for folder in os.listdir(classes_dirs_path):
labels_folder = join_path(classes_dirs_path, folder)
labels_paths = join_path_list(labels_folder, os.listdir(labels_folder))
for im_path in labels_paths:
rotated_im = rotate_image(im_path, rotations=rotations)
Image.fromarray(rotated_im).save(im_path)
rotate_and_save(training_path)
rotate_and_save(validation_path)
print('Traing images info:')
display(count_shapes(training_path))
print('Validation images info:')
display(count_shapes(validation_path))
Traing images info:
| width | height | channels | format | orientation | count | |
|---|---|---|---|---|---|---|
| 0 | 256 | 256 | RGB | JPEG | no data | 351 |
Validation images info:
| width | height | channels | format | orientation | count | |
|---|---|---|---|---|---|---|
| 0 | 256 | 256 | RGB | JPEG | no data | 154 |
plot_example_images(rows=len(classes), samples=4, classes_dirs_path=validation_path,
labels=classes, figsize=[11, 19], rotations=0)
#target_shape = [256, 256]
train_datagen = ImageDataGenerator(
rescale=1/255,
rotation_range=60,
width_shift_range=0.1,
height_shift_range=0.1,
fill_mode='constant',
cval=90,
brightness_range=(0.80, 1.20),
shear_range=10,
zoom_range=0.1,
channel_shift_range=30,
zca_whitening=False,
horizontal_flip=False,
vertical_flip=False)
#train_datagen = ImageDataGenerator(
# rescale=1/255)
validation_datagen = ImageDataGenerator(
rescale=1/255)
train_gen = train_datagen.flow_from_directory(
training_path,
target_size=target_shape,
batch_size=27,
class_mode='categorical')
validation_gen = validation_datagen.flow_from_directory(
validation_path,
target_size=target_shape,
batch_size=22,
class_mode='categorical')
Found 351 images belonging to 7 classes. Found 154 images belonging to 7 classes.
model = Sequential()
model.add(Conv2D(64, [3, 3], activation='relu', input_shape=target_shape + [3]))
model.add(MaxPooling2D(2, 2))
model.add(BatchNormalization())
model.add(Conv2D(128, [3, 3], activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(BatchNormalization())
model.add(Conv2D(256, [3, 3], activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(BatchNormalization())
model.add(Conv2D(512, [3, 3], activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dropout(0.5))
model.add(BatchNormalization())
#model.add(Dense(128, activation='relu'))
#model.add(Dropout(0.5))
#model.add(BatchNormalization())
#model.add(Dense(64, activation='relu'))
#model.add(Dropout(0.5))
#model.add(BatchNormalization())
model.add(Dense(len(classes), activation='softmax'))
class MyCallback(Callback):
def on_epoch_end(self, epoch, logs={}):
desired_acc = np.round(60 / 60, 2)
if logs['val_accuracy'] >= desired_acc:
print('Reached', desired_acc, 'accuracy, stopping training')
self.model.stop_training = True
adam_optimizer = Adam(learning_rate=0.0001)
model.compile(optimizer=adam_optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 254, 254, 64) 1792 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 127, 127, 64) 0 _________________________________________________________________ batch_normalization (BatchNo (None, 127, 127, 64) 256 _________________________________________________________________ conv2d_1 (Conv2D) (None, 125, 125, 128) 73856 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 62, 62, 128) 0 _________________________________________________________________ batch_normalization_1 (Batch (None, 62, 62, 128) 512 _________________________________________________________________ conv2d_2 (Conv2D) (None, 60, 60, 256) 295168 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 30, 30, 256) 0 _________________________________________________________________ batch_normalization_2 (Batch (None, 30, 30, 256) 1024 _________________________________________________________________ conv2d_3 (Conv2D) (None, 28, 28, 512) 1180160 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 14, 14, 512) 0 _________________________________________________________________ batch_normalization_3 (Batch (None, 14, 14, 512) 2048 _________________________________________________________________ flatten (Flatten) (None, 100352) 0 _________________________________________________________________ dropout (Dropout) (None, 100352) 0 _________________________________________________________________ batch_normalization_4 (Batch (None, 100352) 401408 _________________________________________________________________ dense (Dense) (None, 7) 702471 ================================================================= Total params: 2,658,695 Trainable params: 2,456,071 Non-trainable params: 202,624 _________________________________________________________________
%%time
epochs = 100
stop_train_acc = MyCallback()
history = model.fit(
train_gen, validation_data=validation_gen, epochs=epochs, verbose=2,
callbacks=[])
Epoch 1/100 13/13 - 18s - loss: 2.2527 - accuracy: 0.2507 - val_loss: 1.9538 - val_accuracy: 0.1429 Epoch 2/100 13/13 - 8s - loss: 1.5418 - accuracy: 0.4701 - val_loss: 1.9977 - val_accuracy: 0.1429 Epoch 3/100 13/13 - 8s - loss: 1.5225 - accuracy: 0.4929 - val_loss: 1.9843 - val_accuracy: 0.1429 Epoch 4/100 13/13 - 8s - loss: 1.3324 - accuracy: 0.5071 - val_loss: 2.0944 - val_accuracy: 0.1429 Epoch 5/100 13/13 - 8s - loss: 1.2089 - accuracy: 0.5698 - val_loss: 2.3713 - val_accuracy: 0.1429 Epoch 6/100 13/13 - 8s - loss: 1.0759 - accuracy: 0.6296 - val_loss: 2.8172 - val_accuracy: 0.1429 Epoch 7/100 13/13 - 8s - loss: 1.0095 - accuracy: 0.6524 - val_loss: 2.4370 - val_accuracy: 0.1429 Epoch 8/100 13/13 - 8s - loss: 0.9491 - accuracy: 0.6553 - val_loss: 2.3560 - val_accuracy: 0.1429 Epoch 9/100 13/13 - 8s - loss: 1.0008 - accuracy: 0.6182 - val_loss: 3.1387 - val_accuracy: 0.1429 Epoch 10/100 13/13 - 8s - loss: 0.8062 - accuracy: 0.7009 - val_loss: 3.3665 - val_accuracy: 0.1429 Epoch 11/100 13/13 - 8s - loss: 1.0346 - accuracy: 0.6296 - val_loss: 3.3802 - val_accuracy: 0.1429 Epoch 12/100 13/13 - 8s - loss: 0.8408 - accuracy: 0.7009 - val_loss: 3.9762 - val_accuracy: 0.1429 Epoch 13/100 13/13 - 8s - loss: 1.0554 - accuracy: 0.6667 - val_loss: 3.3933 - val_accuracy: 0.1429 Epoch 14/100 13/13 - 9s - loss: 0.7662 - accuracy: 0.7094 - val_loss: 3.5090 - val_accuracy: 0.1429 Epoch 15/100 13/13 - 8s - loss: 0.7714 - accuracy: 0.6866 - val_loss: 3.0087 - val_accuracy: 0.1688 Epoch 16/100 13/13 - 8s - loss: 0.8573 - accuracy: 0.6980 - val_loss: 4.4718 - val_accuracy: 0.1429 Epoch 17/100 13/13 - 8s - loss: 0.8571 - accuracy: 0.6923 - val_loss: 3.6340 - val_accuracy: 0.1429 Epoch 18/100 13/13 - 8s - loss: 0.8351 - accuracy: 0.7179 - val_loss: 5.0626 - val_accuracy: 0.1429 Epoch 19/100 13/13 - 8s - loss: 0.7178 - accuracy: 0.7607 - val_loss: 3.7322 - val_accuracy: 0.1429 Epoch 20/100 13/13 - 8s - loss: 0.6834 - accuracy: 0.7521 - val_loss: 3.4512 - val_accuracy: 0.1494 Epoch 21/100 13/13 - 8s - loss: 0.7486 - accuracy: 0.7123 - val_loss: 2.8890 - val_accuracy: 0.2208 Epoch 22/100 13/13 - 8s - loss: 0.6953 - accuracy: 0.7721 - val_loss: 2.6114 - val_accuracy: 0.1753 Epoch 23/100 13/13 - 8s - loss: 0.7198 - accuracy: 0.7550 - val_loss: 4.7865 - val_accuracy: 0.1429 Epoch 24/100 13/13 - 8s - loss: 0.7571 - accuracy: 0.7635 - val_loss: 4.8425 - val_accuracy: 0.1429 Epoch 25/100 13/13 - 8s - loss: 0.8009 - accuracy: 0.7236 - val_loss: 3.9583 - val_accuracy: 0.1494 Epoch 26/100 13/13 - 8s - loss: 0.7779 - accuracy: 0.7721 - val_loss: 2.7004 - val_accuracy: 0.2792 Epoch 27/100 13/13 - 8s - loss: 0.8278 - accuracy: 0.6980 - val_loss: 2.1678 - val_accuracy: 0.3506 Epoch 28/100 13/13 - 8s - loss: 0.4821 - accuracy: 0.8462 - val_loss: 2.6452 - val_accuracy: 0.2857 Epoch 29/100 13/13 - 8s - loss: 0.6103 - accuracy: 0.7664 - val_loss: 2.0163 - val_accuracy: 0.4935 Epoch 30/100 13/13 - 8s - loss: 0.6562 - accuracy: 0.7578 - val_loss: 1.4260 - val_accuracy: 0.4286 Epoch 31/100 13/13 - 8s - loss: 0.7481 - accuracy: 0.7778 - val_loss: 2.5603 - val_accuracy: 0.3442 Epoch 32/100 13/13 - 8s - loss: 0.6288 - accuracy: 0.7835 - val_loss: 2.6176 - val_accuracy: 0.3377 Epoch 33/100 13/13 - 8s - loss: 0.4552 - accuracy: 0.8376 - val_loss: 1.9937 - val_accuracy: 0.3896 Epoch 34/100 13/13 - 8s - loss: 0.5509 - accuracy: 0.8091 - val_loss: 2.9733 - val_accuracy: 0.3377 Epoch 35/100 13/13 - 8s - loss: 0.4732 - accuracy: 0.8262 - val_loss: 2.7347 - val_accuracy: 0.3636 Epoch 36/100 13/13 - 9s - loss: 0.5777 - accuracy: 0.7892 - val_loss: 1.4932 - val_accuracy: 0.5974 Epoch 37/100 13/13 - 8s - loss: 0.5462 - accuracy: 0.8091 - val_loss: 1.6586 - val_accuracy: 0.5844 Epoch 38/100 13/13 - 8s - loss: 0.6367 - accuracy: 0.8120 - val_loss: 1.0842 - val_accuracy: 0.6558 Epoch 39/100 13/13 - 8s - loss: 0.5929 - accuracy: 0.7920 - val_loss: 1.3156 - val_accuracy: 0.5974 Epoch 40/100 13/13 - 8s - loss: 0.5755 - accuracy: 0.7863 - val_loss: 1.4598 - val_accuracy: 0.6104 Epoch 41/100 13/13 - 8s - loss: 0.5909 - accuracy: 0.8148 - val_loss: 0.7869 - val_accuracy: 0.6688 Epoch 42/100 13/13 - 8s - loss: 0.4921 - accuracy: 0.8148 - val_loss: 0.8288 - val_accuracy: 0.7078 Epoch 43/100 13/13 - 8s - loss: 0.5049 - accuracy: 0.8433 - val_loss: 1.2436 - val_accuracy: 0.6234 Epoch 44/100 13/13 - 8s - loss: 0.5825 - accuracy: 0.8120 - val_loss: 0.8866 - val_accuracy: 0.7143 Epoch 45/100 13/13 - 8s - loss: 0.5683 - accuracy: 0.8034 - val_loss: 0.8035 - val_accuracy: 0.7273 Epoch 46/100 13/13 - 8s - loss: 0.5751 - accuracy: 0.8148 - val_loss: 0.5973 - val_accuracy: 0.7597 Epoch 47/100 13/13 - 8s - loss: 0.4859 - accuracy: 0.8234 - val_loss: 0.6246 - val_accuracy: 0.7468 Epoch 48/100 13/13 - 8s - loss: 0.6015 - accuracy: 0.8006 - val_loss: 0.9760 - val_accuracy: 0.6883 Epoch 49/100 13/13 - 8s - loss: 0.4944 - accuracy: 0.8376 - val_loss: 0.6023 - val_accuracy: 0.7987 Epoch 50/100 13/13 - 8s - loss: 0.4284 - accuracy: 0.8234 - val_loss: 0.6165 - val_accuracy: 0.7792 Epoch 51/100 13/13 - 8s - loss: 0.4680 - accuracy: 0.8604 - val_loss: 0.5851 - val_accuracy: 0.8117 Epoch 52/100 13/13 - 8s - loss: 0.3991 - accuracy: 0.8519 - val_loss: 0.5939 - val_accuracy: 0.7792 Epoch 53/100 13/13 - 8s - loss: 0.4156 - accuracy: 0.8405 - val_loss: 0.5825 - val_accuracy: 0.7987 Epoch 54/100 13/13 - 8s - loss: 0.4042 - accuracy: 0.8547 - val_loss: 0.7788 - val_accuracy: 0.7662 Epoch 55/100 13/13 - 8s - loss: 0.3082 - accuracy: 0.8946 - val_loss: 0.5777 - val_accuracy: 0.7857 Epoch 56/100 13/13 - 8s - loss: 0.3653 - accuracy: 0.8547 - val_loss: 0.6302 - val_accuracy: 0.7597 Epoch 57/100 13/13 - 8s - loss: 0.5239 - accuracy: 0.8148 - val_loss: 0.5046 - val_accuracy: 0.7922 Epoch 58/100 13/13 - 8s - loss: 0.3817 - accuracy: 0.8746 - val_loss: 0.7060 - val_accuracy: 0.7532 Epoch 59/100 13/13 - 8s - loss: 0.4844 - accuracy: 0.8462 - val_loss: 0.5534 - val_accuracy: 0.8117 Epoch 60/100 13/13 - 8s - loss: 0.4165 - accuracy: 0.8490 - val_loss: 0.5146 - val_accuracy: 0.8247 Epoch 61/100 13/13 - 8s - loss: 0.3981 - accuracy: 0.8632 - val_loss: 0.7506 - val_accuracy: 0.7987 Epoch 62/100 13/13 - 8s - loss: 0.3615 - accuracy: 0.8832 - val_loss: 0.6676 - val_accuracy: 0.8182 Epoch 63/100 13/13 - 8s - loss: 0.4207 - accuracy: 0.8519 - val_loss: 0.6277 - val_accuracy: 0.7922 Epoch 64/100 13/13 - 8s - loss: 0.4383 - accuracy: 0.8604 - val_loss: 0.5845 - val_accuracy: 0.8442 Epoch 65/100 13/13 - 8s - loss: 0.4929 - accuracy: 0.8632 - val_loss: 0.4831 - val_accuracy: 0.8766 Epoch 66/100 13/13 - 8s - loss: 0.3983 - accuracy: 0.8775 - val_loss: 0.5409 - val_accuracy: 0.8312 Epoch 67/100 13/13 - 8s - loss: 0.4388 - accuracy: 0.8547 - val_loss: 0.4821 - val_accuracy: 0.8117 Epoch 68/100 13/13 - 8s - loss: 0.3647 - accuracy: 0.8661 - val_loss: 0.5857 - val_accuracy: 0.8182 Epoch 69/100 13/13 - 8s - loss: 0.2406 - accuracy: 0.9088 - val_loss: 0.6202 - val_accuracy: 0.7792 Epoch 70/100 13/13 - 8s - loss: 0.3055 - accuracy: 0.8860 - val_loss: 0.5919 - val_accuracy: 0.8312 Epoch 71/100 13/13 - 8s - loss: 0.4539 - accuracy: 0.8462 - val_loss: 0.5869 - val_accuracy: 0.8117 Epoch 72/100 13/13 - 8s - loss: 0.3533 - accuracy: 0.8832 - val_loss: 0.6626 - val_accuracy: 0.7792 Epoch 73/100 13/13 - 8s - loss: 0.3528 - accuracy: 0.8803 - val_loss: 0.4950 - val_accuracy: 0.8377 Epoch 74/100 13/13 - 8s - loss: 0.4046 - accuracy: 0.8661 - val_loss: 0.3567 - val_accuracy: 0.8896 Epoch 75/100 13/13 - 8s - loss: 0.3384 - accuracy: 0.8661 - val_loss: 0.3881 - val_accuracy: 0.8571 Epoch 76/100 13/13 - 8s - loss: 0.2693 - accuracy: 0.8860 - val_loss: 0.4756 - val_accuracy: 0.8701 Epoch 77/100 13/13 - 8s - loss: 0.4773 - accuracy: 0.8519 - val_loss: 0.4293 - val_accuracy: 0.8831 Epoch 78/100 13/13 - 8s - loss: 0.3626 - accuracy: 0.8775 - val_loss: 0.3786 - val_accuracy: 0.8636 Epoch 79/100 13/13 - 8s - loss: 0.2662 - accuracy: 0.9060 - val_loss: 0.7940 - val_accuracy: 0.7987 Epoch 80/100 13/13 - 8s - loss: 0.4402 - accuracy: 0.8604 - val_loss: 0.8449 - val_accuracy: 0.7727 Epoch 81/100 13/13 - 8s - loss: 0.4506 - accuracy: 0.8604 - val_loss: 0.4964 - val_accuracy: 0.8506 Epoch 82/100 13/13 - 8s - loss: 0.3322 - accuracy: 0.8917 - val_loss: 0.3522 - val_accuracy: 0.8766 Epoch 83/100 13/13 - 8s - loss: 0.3132 - accuracy: 0.9003 - val_loss: 0.3728 - val_accuracy: 0.9026 Epoch 84/100 13/13 - 8s - loss: 0.2973 - accuracy: 0.8917 - val_loss: 0.5083 - val_accuracy: 0.8312 Epoch 85/100 13/13 - 8s - loss: 0.3179 - accuracy: 0.9031 - val_loss: 0.3842 - val_accuracy: 0.8442 Epoch 86/100 13/13 - 8s - loss: 0.3193 - accuracy: 0.8832 - val_loss: 0.5674 - val_accuracy: 0.8442 Epoch 87/100 13/13 - 8s - loss: 0.3406 - accuracy: 0.8889 - val_loss: 0.6354 - val_accuracy: 0.8377 Epoch 88/100 13/13 - 8s - loss: 0.2698 - accuracy: 0.9145 - val_loss: 0.3511 - val_accuracy: 0.8896 Epoch 89/100 13/13 - 8s - loss: 0.2661 - accuracy: 0.9174 - val_loss: 0.3365 - val_accuracy: 0.8896 Epoch 90/100 13/13 - 8s - loss: 0.3168 - accuracy: 0.9117 - val_loss: 0.3352 - val_accuracy: 0.8961 Epoch 91/100 13/13 - 8s - loss: 0.3481 - accuracy: 0.8974 - val_loss: 0.6436 - val_accuracy: 0.8247 Epoch 92/100 13/13 - 8s - loss: 0.2817 - accuracy: 0.8946 - val_loss: 0.5352 - val_accuracy: 0.8442 Epoch 93/100 13/13 - 8s - loss: 0.2776 - accuracy: 0.8974 - val_loss: 0.9550 - val_accuracy: 0.7792 Epoch 94/100 13/13 - 8s - loss: 0.2732 - accuracy: 0.9145 - val_loss: 0.4424 - val_accuracy: 0.8701 Epoch 95/100 13/13 - 8s - loss: 0.3137 - accuracy: 0.8946 - val_loss: 0.4633 - val_accuracy: 0.8571 Epoch 96/100 13/13 - 8s - loss: 0.3750 - accuracy: 0.8917 - val_loss: 0.6258 - val_accuracy: 0.8312 Epoch 97/100 13/13 - 8s - loss: 0.4664 - accuracy: 0.8575 - val_loss: 0.5990 - val_accuracy: 0.8506 Epoch 98/100 13/13 - 8s - loss: 0.2761 - accuracy: 0.9174 - val_loss: 0.4384 - val_accuracy: 0.8961 Epoch 99/100 13/13 - 8s - loss: 0.2117 - accuracy: 0.9231 - val_loss: 0.6354 - val_accuracy: 0.8442 Epoch 100/100 13/13 - 8s - loss: 0.2656 - accuracy: 0.9231 - val_loss: 0.4924 - val_accuracy: 0.8571 Wall time: 13min 17s
model.save(join_path(project_path, 'model_1.h5'))
def process_history(history):
history_df = pd.DataFrame(history.history)
history_df['epoch'] = range(1, history_df.shape[0] + 1)
melt = history_df.melt(id_vars=['epoch'], var_name='metric')
melt['dataset'] = 'training'
melt.loc[melt['metric'].str.find('val')!=-1, 'dataset'] = 'validation'
melt.loc[melt['metric'].str.find('loss')!=-1, 'metric'] = 'loss'
melt.loc[melt['metric'].str.find('accuracy')!=-1, 'metric'] = 'accuracy'
return melt
def append_history(new_history, old_history_df, old_history=False):
new_history_df = process_history(new_history)
if old_history:
new_history_df['epoch'] = new_history_df['epoch'] + old_history_df['epoch'].max()
new_history_df = pd.concat([old_history_df, new_history_df], axis=0)
return new_history_df
return new_history_df
del(history_df)
if 'history_df' in globals():
history_df = append_history(new_history=history,
old_history_df=history_df,
old_history=True)
else:
history_df = append_history(new_history=history,
old_history_df=False)
#history_df = append_history(new_history=history,
# old_history_df=False)
#t = history_df.loc[history_df['epoch']>50]
t = history_df.copy()
sns.relplot(data=t, x='epoch', y='value', row='metric', hue='dataset',
kind='line', aspect=3, height=3,
facet_kws={'sharey': False, 'legend_out': False})
plt.show()
eval_train_datagen = ImageDataGenerator(rescale=1/255)
eval_validation_datagen = ImageDataGenerator(rescale=1/255)
eval_train = eval_train_datagen.flow_from_directory(
training_path,
target_size=target_shape,
batch_size=24,
class_mode='categorical',
shuffle=False)
eval_validation = eval_validation_datagen.flow_from_directory(
validation_path,
target_size=target_shape,
batch_size=12,
class_mode='categorical',
shuffle=False)
train_score = np.round(model.evaluate(eval_train, verbose=3), 2)
val_score = np.round(model.evaluate(eval_validation, verbose=3), 2)
print('\n')
print(f'Train_score: loss = {train_score[0]}, accuracy = {train_score[1]}')
print(f'Validation_score: loss = {val_score[0]}, accuracy = {val_score[1]}')
Found 351 images belonging to 7 classes. Found 154 images belonging to 7 classes. Train_score: loss = 0.07, accuracy = 0.98 Validation_score: loss = 0.49, accuracy = 0.86
def predictons_df(image_generator, model):
class_names = image_generator.class_indices.keys()
proba_df = pd.DataFrame(
model.predict(image_generator),
columns=class_names)
proba_df['correct_pred'] = ((np.argmax(model.predict(image_generator), axis=1) \
==image_generator.labels) + 0).astype(np.uint8)
proba_df['pred_proba'] = model.predict(image_generator).max(axis=1)
proba_df['file_path'] = pd.Series(image_generator.filepaths).astype('string')
labels_dict = {v: k for k, v in image_generator.class_indices.items()}
proba_df['correct_label'] = pd.Series(image_generator.labels).map(labels_dict)
proba_df.loc[:, class_names] = proba_df.loc[:, class_names].round(2)
proba_df.loc[:, 'pred_proba'] = proba_df.loc[:, 'pred_proba'].round(2)
return proba_df
df = predictons_df(eval_train, model)
def plot_missclassified(image_generator, model, height=20, missclass=True):
if missclass:
pred_df = predictons_df(image_generator, model) \
.loc[lambda df: df['correct_pred']==0]
else:
pred_df = predictons_df(image_generator, model)
columns = 3
rows = int(np.ceil(pred_df.shape[0] / columns))
width = 14
#height = (70 / 14) * width
fig, axs = plt.subplots(nrows=rows, ncols=columns, figsize=(width, height))
axs = axs.flatten()
for ax, example in enumerate(pred_df.iterrows()):
fp = example[1]['file_path']
axs[ax].imshow(mpimg.imread(fp))
axs[ax].axis('off')
labels = pred_df.columns[pred_df.columns.isin(classes)]
#return example[1][labels]
#break
for label, proba, y in zip(labels, example[1][labels], range(0, 20*len(labels), 20)):
#print(example[0])
#print(pred_df.loc[example[1]])
if label==pred_df.loc[example[0], 'correct_label']:
color = 'green'
elif proba==pred_df.loc[example[0], 'pred_proba']:
color = 'red'
else:
color = 'white'
axs[ax].annotate(f'\n{label}:', [256, 12 + y],
color=color, size=15.5, weight=240)
axs[ax].annotate(f'{proba:21.2f}', [256, 12 + y],
color=color, size=15.5, weight=240)
for row in pred_df.iterrows():
for label in labels: pass
#print(label, row[1][label])
for ax in axs: ax.axis('off')
plt.tight_layout()
return axs
axs = plot_missclassified(eval_train, model, height=10)
axs = plot_missclassified(eval_validation, model, height=26)
m = load_model(join_path(project_path, 'model_1.h5'))
# all_images to np.array
images = np.concatenate([eval_validation.next()[0] for i in range(eval_train.__len__())])
#show image to use activations
img = images[120]
fig, ax = plt.subplots(figsize=(5, 5))
ax.imshow(img)
ax.axis('off')
plt.tight_layout()
plt.show()
img = np.expand_dims(img, 0)
layer = 'max_pooling2d'
conv_layers = ['conv2d', 'conv2d_1', 'conv2d_2', 'conv2d_3']
pooling_layers = ['max_pooling2d', 'max_pooling2d_2', 'max_pooling2d_3']
activation = keract.get_activations(m, img, layer_names=conv_layers)
keract.display_activations(activation)
conv2d (1, 254, 254, 64)
conv2d_1 (1, 125, 125, 128)
conv2d_2 (1, 60, 60, 256)
conv2d_3 (1, 28, 28, 512)